[XEN] Fix p2m->shadow callback to pass the mfn being written to
authorTim Deegan <Tim.Deegan@xensource.com>
Tue, 3 Jul 2007 13:57:59 +0000 (14:57 +0100)
committerTim Deegan <Tim.Deegan@xensource.com>
Tue, 3 Jul 2007 13:57:59 +0000 (14:57 +0100)
as well as the pointer and contents.  This was being calculated but
got disconnected from its use when the p2m and shadow functions were
separated.
Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
xen/arch/x86/mm/hap/hap.c
xen/arch/x86/mm/p2m.c
xen/arch/x86/mm/shadow/common.c
xen/arch/x86/mm/shadow/private.h
xen/include/asm-x86/paging.h

index 36f0348b14da741577fdaf5362b06dfcdff7c650..1d7abd6667ff7ef05190c1c168f5f40d741ade3b 100644 (file)
@@ -672,7 +672,7 @@ static void p2m_install_entry_in_monitors(struct domain *d, l3_pgentry_t *l3e)
 
 void 
 hap_write_p2m_entry(struct vcpu *v, unsigned long gfn, l1_pgentry_t *p,
-                    l1_pgentry_t new, unsigned int level)
+                    mfn_t table_mfn, l1_pgentry_t new, unsigned int level)
 {
     hap_lock(v->domain);
 
index 4ec884aa2c5db4f364d17cf8fdd4787f38de1ada..9efb571cb37295f9d94a410952cab24f59c9a126 100644 (file)
@@ -146,17 +146,20 @@ p2m_next_level(struct domain *d, mfn_t *table_mfn, void **table,
 
         switch ( type ) {
         case PGT_l3_page_table:
-            paging_write_p2m_entry(d, gfn, p2m_entry, new_entry, 4);
+            paging_write_p2m_entry(d, gfn, 
+                                   p2m_entry, *table_mfn, new_entry, 4);
             break;
         case PGT_l2_page_table:
 #if CONFIG_PAGING_LEVELS == 3
             /* for PAE mode, PDPE only has PCD/PWT/P bits available */
             new_entry = l1e_from_pfn(mfn_x(page_to_mfn(pg)), _PAGE_PRESENT);
 #endif
-            paging_write_p2m_entry(d, gfn, p2m_entry, new_entry, 3);
+            paging_write_p2m_entry(d, gfn, 
+                                   p2m_entry, *table_mfn, new_entry, 3);
             break;
         case PGT_l1_page_table:
-            paging_write_p2m_entry(d, gfn, p2m_entry, new_entry, 2);
+            paging_write_p2m_entry(d, gfn, 
+                                   p2m_entry, *table_mfn, new_entry, 2);
             break;
         default:
             BUG();
@@ -222,7 +225,7 @@ set_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn, u32 l1e_flags)
         entry_content = l1e_empty();
 
     /* level 1 entry */
-    paging_write_p2m_entry(d, gfn, p2m_entry, entry_content, 1);
+    paging_write_p2m_entry(d, gfn, p2m_entry, table_mfn, entry_content, 1);
 
     /* Success */
     rv = 1;
@@ -707,6 +710,7 @@ void p2m_set_flags_global(struct domain *d, u32 l1e_flags)
     l1_pgentry_t l1e_content;
     l1_pgentry_t *l1e;
     l2_pgentry_t *l2e;
+    mfn_t l1mfn;
     int i1, i2;
 #if CONFIG_PAGING_LEVELS >= 3
     l3_pgentry_t *l3e;
@@ -741,7 +745,7 @@ void p2m_set_flags_global(struct domain *d, u32 l1e_flags)
        {
            continue;
        }
-       l3e = map_domain_page(mfn_x(_mfn(l4e_get_pfn(l4e[i4]))));
+       l3e = map_domain_page(l4e_get_pfn(l4e[i4]));
 #endif /* now at levels 3 or 4... */
        for ( i3 = 0; 
              i3 < ((CONFIG_PAGING_LEVELS==4) ? L3_PAGETABLE_ENTRIES : 8); 
@@ -751,7 +755,7 @@ void p2m_set_flags_global(struct domain *d, u32 l1e_flags)
            {
                continue;
            }
-           l2e = map_domain_page(mfn_x(_mfn(l3e_get_pfn(l3e[i3]))));
+           l2e = map_domain_page(l3e_get_pfn(l3e[i3]));
 #endif /* all levels... */
            for ( i2 = 0; i2 < L2_PAGETABLE_ENTRIES; i2++ )
            {
@@ -759,7 +763,9 @@ void p2m_set_flags_global(struct domain *d, u32 l1e_flags)
                {
                    continue;
                }
-               l1e = map_domain_page(mfn_x(_mfn(l2e_get_pfn(l2e[i2]))));
+
+                l1mfn = _mfn(l2e_get_pfn(l2e[i2]));
+               l1e = map_domain_page(mfn_x(l1mfn));
                
                for ( i1 = 0; i1 < L1_PAGETABLE_ENTRIES; i1++, gfn++ )
                {
@@ -769,7 +775,8 @@ void p2m_set_flags_global(struct domain *d, u32 l1e_flags)
                    gfn = get_gpfn_from_mfn(mfn);
                    /* create a new 1le entry using l1e_flags */
                    l1e_content = l1e_from_pfn(mfn, l1e_flags);
-                   paging_write_p2m_entry(d, gfn, &l1e[i1], l1e_content, 1);
+                   paging_write_p2m_entry(d, gfn, &l1e[i1], 
+                                           l1mfn, l1e_content, 1);
                }
                unmap_domain_page(l1e);
            }
index 16a0a681b0601429608c366bb5e40c23cce5de5a..eb015b7a1b97ff23bf0c2c4da5bb429fc1dc5e24 100644 (file)
@@ -2733,11 +2733,11 @@ static int shadow_test_disable(struct domain *d)
  * shadow processing jobs.
  */
 void
-shadow_write_p2m_entry(struct vcpu *v, unsigned long gfn, l1_pgentry_t *p, 
+shadow_write_p2m_entry(struct vcpu *v, unsigned long gfn, 
+                       l1_pgentry_t *p, mfn_t table_mfn, 
                        l1_pgentry_t new, unsigned int level)
 {
     struct domain *d = v->domain;
-    mfn_t table_mfn = pagetable_get_mfn(d->arch.phys_table);
     mfn_t mfn;
     
     shadow_lock(d);
index 12f712001c8f81cf0e3606681888e194aa884483..87e72f0c6dbe3cca8b186fe352c8110642f71fe6 100644 (file)
@@ -392,8 +392,8 @@ void shadow_free_p2m_page(struct domain *d, struct page_info *pg);
 
 /* Functions that atomically write PT/P2M entries and update state */
 void shadow_write_p2m_entry(struct vcpu *v, unsigned long gfn, 
-                            l1_pgentry_t *p, l1_pgentry_t new, 
-                            unsigned int level);
+                            l1_pgentry_t *p, mfn_t table_mfn,
+                            l1_pgentry_t new, unsigned int level);
 int shadow_write_guest_entry(struct vcpu *v, intpte_t *p,
                              intpte_t new, mfn_t gmfn);
 int shadow_cmpxchg_guest_entry(struct vcpu *v, intpte_t *p,
index 0f2809fe94eb3739b828a02b1644b10761bfc688..35067b289ba4af44a400ac0016ab9b86f77aa609 100644 (file)
@@ -122,7 +122,8 @@ struct paging_mode {
     void          (*update_cr3            )(struct vcpu *v, int do_locking);
     void          (*update_paging_modes   )(struct vcpu *v);
     void          (*write_p2m_entry       )(struct vcpu *v, unsigned long gfn,
-                                            l1_pgentry_t *p, l1_pgentry_t new, 
+                                            l1_pgentry_t *p, mfn_t table_mfn, 
+                                            l1_pgentry_t new, 
                                             unsigned int level);
     int           (*write_guest_entry     )(struct vcpu *v, intpte_t *p,
                                             intpte_t new, mfn_t gmfn);
@@ -291,17 +292,22 @@ static inline void safe_write_pte(l1_pgentry_t *p, l1_pgentry_t new)
 }
 
 /* Atomically write a P2M entry and update the paging-assistance state 
- * appropriately. */
+ * appropriately. 
+ * Arguments: the domain in question, the GFN whose mapping is being updated, 
+ * a pointer to the entry to be written, the MFN in which the entry resides, 
+ * the new contents of the entry, and the level in the p2m tree at which 
+ * we are writing. */
 static inline void paging_write_p2m_entry(struct domain *d, unsigned long gfn, 
-                                          l1_pgentry_t *p, l1_pgentry_t new, 
-                                          unsigned int level)
+                                          l1_pgentry_t *p, mfn_t table_mfn,
+                                          l1_pgentry_t new, unsigned int level)
 {
     struct vcpu *v = current;
     if ( v->domain != d )
         v = d->vcpu[0];
     if ( likely(v && paging_mode_enabled(d) && v->arch.paging.mode != NULL) )
     {
-        return v->arch.paging.mode->write_p2m_entry(v, gfn, p, new, level);
+        return v->arch.paging.mode->write_p2m_entry(v, gfn, p, table_mfn,
+                                                    new, level);
     }
     else 
         safe_write_pte(p, new);